uint32_t runq_sort_last;
struct timer ticker;
unsigned int tick;
+ unsigned int idle_bias;
};
/*
init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = csched_priv.runq_sort;
+ spc->idle_bias = NR_CPUS - 1;
per_cpu(schedule_data, cpu).sched_priv = spc;
/* Start off idling... */
}
static int
-csched_cpu_pick(struct vcpu *vc)
+_csched_cpu_pick(struct vcpu *vc, bool_t commit)
{
cpumask_t cpus;
cpumask_t idlers;
if ( ( (weight_cpu < weight_nxt) ^ sched_smt_power_savings )
&& (weight_cpu != weight_nxt) )
{
- cpu = nxt;
- cpu_clear(cpu, cpus);
+ cpu = cycle_cpu(CSCHED_PCPU(nxt)->idle_bias, nxt_idlers);
+ if ( commit )
+ CSCHED_PCPU(nxt)->idle_bias = cpu;
+ cpus_andnot(cpus, cpus, per_cpu(cpu_sibling_map, cpu));
}
else
{
return cpu;
}
+static int
+csched_cpu_pick(struct vcpu *vc)
+{
+ return _csched_cpu_pick(vc, 1);
+}
+
static inline void
__csched_vcpu_acct_start(struct csched_vcpu *svc)
{
{
__csched_vcpu_acct_start(svc);
}
- else if ( csched_cpu_pick(current) != cpu )
+ else if ( _csched_cpu_pick(current, 0) != cpu )
{
CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
CSCHED_STAT_CRANK(migrate_running);